Moving from Mayavi to Matplotlib
A long standing dependency of MotionClouds is MayaVi. While powerful, it is tedious to compile and may discourage new users. We are trying here to show some attempts to do the same with matplotlib.
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
Testing 3D plots in pylab¶
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y, Z = axes3d.get_test_data(0.05)
cset = ax.contour(X, Y, Z, cmap=cm.coolwarm)
ax.clabel(cset, fontsize=9, inline=1)
X, Y, Z
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import matplotlib.pyplot as plt
import numpy as np
n_angles = 36
n_radii = 8
# An array of radii
# Does not include radius r=0, this is to eliminate duplicate points
radii = np.linspace(0.125, 1.0, n_radii)
# An array of angles
angles = np.linspace(0, 2*np.pi, n_angles, endpoint=False)
# Repeat all angles for each radius
angles = np.repeat(angles[...,np.newaxis], n_radii, axis=1)
# Convert polar (radii, angles) coords to cartesian (x, y) coords
# (0, 0) is added here. There are no duplicate points in the (x, y) plane
x = np.append(0, (radii*np.cos(angles)).flatten())
y = np.append(0, (radii*np.sin(angles)).flatten())
# Pringle surface
z = np.sin(-x*y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, cmap=cm.jet, linewidth=0.2, alpha=.6)
"""
Contour plots of unstructured triangular grids.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.tri as tri
import numpy as np
import math
# First create the x and y coordinates of the points.
n_angles = 48
n_radii = 8
min_radius = 0.25
radii = np.linspace(min_radius, 0.95, n_radii)
angles = np.linspace(0, 2*math.pi, n_angles, endpoint=False)
angles = np.repeat(angles[...,np.newaxis], n_radii, axis=1)
angles[:,1::2] += math.pi/n_angles
x = (radii*np.cos(angles)).flatten()
y = (radii*np.sin(angles)).flatten()
z = (np.cos(radii)*np.cos(angles*3.0)).flatten()
# Create a custom triangulation
triang = tri.Triangulation(x, y)
# Mask off unwanted triangles.
#xmid = x[triang.triangles].mean(axis=1)
#ymid = y[triang.triangles].mean(axis=1)
#mask = np.where(xmid*xmid + ymid*ymid < min_radius*min_radius, 1, 0)
#triang.set_mask(mask)
plt.figure()
plt.gca(projection='3d')
plt.tricontour(triang, z)
from __future__ import print_function
"""
A very simple 'animation' of a 3D plot
"""
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
import numpy as np
import time
def generate(X, Y, phi):
R = 1 - np.sqrt(X**2 + Y**2)
return np.cos(2 * np.pi * X + phi) * R
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
xs = np.linspace(-1, 1, 50)
ys = np.linspace(-1, 1, 50)
X, Y = np.meshgrid(xs, ys)
Z = generate(X, Y, 0.0)
wframe = None
tstart = time.time()
for phi in np.linspace(0, 360 / 2 / np.pi, 100):
oldcol = wframe
Z = generate(X, Y, phi)
wframe = ax.plot_wireframe(X, Y, Z, rstride=2, cstride=2)
# Remove old line collection before drawing
if oldcol is not None:
ax.collections.remove(oldcol)
plt.pause(.001)
print ('FPS: %f' % (100 / (time.time() - tstart)))
However, there is nothing really easy yet to produce a contour plot of a scalar field, as mayavi does very well.
Testing the utility functions of Motion Clouds
Motion Clouds utilities
Here, we test some of the utilities that are delivered with the MotionClouds package.
import MotionClouds as mc
progressbar
Wrapper around pyprind.
### PERCENTAGE INDICATOR EXAMPLE ###
n = 10000
my_prperc = mc.progressbar.ProgPercent(n) # 1) init. with number of iterations
for i in range(n):
# do some computation
my_prperc.update()
Handling filenames¶
By default, the folder for generating figures or data is mc.figpath:
print mc.figpath
To generate figures, we assign file names, such as:
name = 'color'
import os
filename = os.path.join(mc.figpath, name)
It is then possible to check if that figures exist:
print filename, mc.check_if_anim_exist(filename), mc.recompute
Importing MayaVi¶
print mc.import_mayavi.__doc__
Mayavi is difficult to compile on some architecture (think Mac OsX), so we allowed the possibility of an ImportError:
print mc.MAYAVI
mc.import_mayavi()
To force MayaVi to not be imported, set:
# mc.MAYAVI = 'Do NOT Import'
# mc.import_mayavi()
exporting to various formats¶
It is possible to export motion clouds to many different formats. Here are some examples:
name = 'export'
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
z = mc.rectif(mc.random_cloud(mc.envelope_gabor(fx, fy, ft)))
mc.PROGRESS = False
for vext in mc.SUPPORTED_FORMATS:
print 'Exporting to format: ', vext
mc.anim_save(z, os.path.join(mc.figpath, name), display=False, vext=vext, verbose=False)
showing a video¶
To show a video in a notebook, issue:
mc.notebook = True # True by default
mc.in_show_video('export')
Rectifying the contrast¶
The mc.rectif function allows to rectify the amplitude of luminance values within the whole generated texture between $0$ and $1$:
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
color = mc.envelope_color(fx, fy, ft)
envelope = color * mc.envelope_gabor(fx, fy, ft)
image = mc.random_cloud(envelope)
print 'Min :', image.min(), ', mean: ', image.mean(), ', max: ', image.max()
image = mc.rectif(image)
print 'Min :', image.min(), ', mean: ', image.mean(), ', max: ', image.max()
Generating figures¶
It is easy to generate figures, either directly from a set of parameters for the Motion Clouds (with mc.figure_MC) or from an arbitrary envelope (with mc.figure):
name = 'test_figures_MC'
#initialize
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
mc.figures_MC(fx, fy, ft, name)
mc.in_show_video(name)
name = 'test_figures'
#initialize
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
color = mc.envelope_color(fx, fy, ft)
z = color * mc.envelope_gabor(fx, fy, ft)
mc.figures(z, name)
mc.in_show_video(name)
There are also two specific functions to generate the mayavi vizualizations: cube and visualize.
Testing the different components of the enveloppe
Motion Clouds: testing components of the envelope¶
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
#mc.recompute = True
mc.notebook = True
Testing the speed¶
Here the link to the test page for the component Speed
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
color = mc.envelope_color(fx, fy, ft)
name = 'speed'
z = color*mc.envelope_speed(fx, fy, ft)
mc.figures(z, name)
mc.show_video2(name)
Exploring the orientation component of the envelope around a grating.¶
Here the link to the test page for the component Grating
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
color = mc.envelope_color(fx, fy, ft)
name = 'grating'
z = color * mc.envelope_gabor(fx, fy, ft)
mc.figures(z, name)
mc.show_video2(name)
Here the link to the test page for the component Radial
B_theta = np.pi/8.
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
name = 'radial'
mc.figures_MC(fx, fy, ft, name, B_theta=B_theta)
verbose = False
mc.show_video2(name)
Testing the color¶
Here the link to the test page for the component Color
name = 'color'
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
z = mc.envelope_color(fx, fy, ft)
mc.figures(z, name)
mc.show_video2(name)
Testing competing Motion Clouds with psychopy
Analysis of a Discrimination Experiment¶
In the psychopy_competition.py script, we implemented an experiment to test whether one could discriminate the dominating motion in a sum of two motion clouds in opposite directions.
Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
Analysis, version 1¶
In a first version of the experiment, we only stored the results in a numpy file:
!ls ../data/competing_v1_*npy
import glob
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('../data/competing_v1_*npy'):
results = np.load(fn)
print 'experiment ', fn, ', # trials=', results.shape[1]
ax.scatter(results[1, :], results[0, :])
_ = ax.axis([0., 1., -1.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax.set_ylabel('perceived direction')
alpha = .3
fig = plt.figure(figsize=(12,5))
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
data = []
for fn in glob.glob('../data/competing_v1_*npy'):
results = np.load(fn)
data_ = np.empty(results.shape)
# lower stronger, detected lower = CORRECT is 1
ax1.scatter(results[1, results[1, :]<.5],
1.*(results[0, results[1, :]<.5]==-1),
c='r', alpha=alpha)
# copying data: contrast (from .5 to 1), correct
data_[0, results[1, :]<.5] = 1. - results[1, results[1, :]<.5]
data_[1, results[1, :]<.5] = 1.*(results[0, results[1, :]<.5]==-1)
# upper stronger, detected lower = INCORRECT is 1
ax1.scatter(results[1, results[1, :]>.5],
1.*(results[0, results[1, :]>.5]==-1),
c='b', alpha=alpha)
# lower stronger, detected upper = INCORRECT is 1
ax2.scatter(results[1, results[1, :]<.5],
1.*(results[0, results[1, :]<.5]==1),
c='b', alpha=alpha, marker='x')
# upper stronger, detected upper = CORRECT is 1
ax2.scatter(results[1, results[1, :]>.5],
1.*(results[0, results[1, :]>.5]==1),
c='r', alpha=alpha, marker='x')
# copying data: contrast (from .5 to 1), correct
data_[0, results[1, :]>=.5] = results[1, results[1, :]>=.5]
data_[1, results[1, :]>=.5] = 1.*(results[0, results[1, :]>=.5]==1)
data.append(data_)
for ax in [ax1, ax2]:
ax.axis([0., 1., -.1, 1.1])
_ = ax.set_xlabel('contrast')
_ = ax1.set_ylabel('detected lower')
_ = ax1.legend(['lower', 'upper'], loc='right')
_ = ax2.set_ylabel('detected upper')
_ = ax2.legend(['lower', 'upper'], loc='right')
(note the subplots are equivalent up to a flip)
One could not fit psychometric curves for this 2AFC. See for instance https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3110332/
let's explore another way:
import numpy as np
import pylab
from scipy.optimize import curve_fit
def sigmoid(c, c0, k):
y = 1 / (1 + np.exp(-k*(c-c0)))
return y
for fn in glob.glob('../data/competing_v1_*npy'):
results = np.load(fn)
cdata, ydata = results[1, :], .5*results[0, : ]+.5
pylab.plot(cdata, ydata, 'o')
popt, pcov = curve_fit(sigmoid, cdata, ydata)
print popt
c = np.linspace(0, 1, 50)
y = sigmoid(c, *popt)
pylab.plot(c, y)
pylab.ylim(-.05, 1.05)
pylab.legend(loc='best')
Analysis, version 2¶
In the second version, we also stored some information about the experiment
from psychopy import misc
mean, slope = [], []
for fn in glob.glob('../data/competing_v2_*npy'):
results = np.load(fn)
data = misc.fromFile(fn.replace('npy', 'pickle'))
print data
cdata, ydata = results[1, :], .5*results[0, : ]+.5
pylab.plot(cdata, ydata, 'o')
popt, pcov = curve_fit(sigmoid, cdata, ydata)
mean.append(popt[0])
slope.append(popt[1])
c = np.linspace(0, 1, 50)
y = sigmoid(c, *popt)
pylab.plot(c, y)
pylab.text(0.05, 0.8, 'mean : %0.2f , slope : %0.2f ' %(np.mean(mean), np.mean(slope)))
pylab.ylim(-.05, 1.05)
pylab.legend(loc='best')
Testing discrimination between Motion Clouds with psychopy
Analysis of a discrimination experiment¶
In the psychopy_discrimination.py script, we implemented an experiment to test whether the overall shape of motion clouds could influence discrimination of speed.
Herein, we analyse the data that was collected over different sessions and try to draw some conclusions.
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
The experiment¶
#%pycat psychopy_discrimination.py
Analysis - version 1¶
In a first version of the experiment, we only stored the results in a numpy file.
!ls ../data/discriminating_*
!ls ../data/
import glob
for fn in glob.glob('../data/discriminating_*npy'):
results = np.load(fn)
print fn, results.shape
print('analyzing results')
print '# of trials :', np.abs(results).sum(axis=1)
print 'average results: ', (results.sum(axis=1)/np.abs(results).sum(axis=1)*.5+.5)
%whos
Another solution is to use:
#data= 1
#np.savez('toto', data=data, results=results)
#print np.load('toto.npz')['data']
or directly psychopy.misc:
from psychopy import misc
#info = misc.fromFile('../data/discriminating.pickle')
alphas = [-1., -.5, 0., 0.5, 1., 1.5, 2.]
fileName = '../data/discriminating_' + info['observer'] + '.pickle'
info['alphas'] = alphas
info['results'] = results
#numpy.savez(fileName, results=results, alphas=alphas)
misc.toFile(fileName, info)
Analysis - version 2¶
In the second version, we stored more data:
import glob
from psychopy import misc
fig = plt.figure()
ax = fig.add_subplot(111)
for fn in glob.glob('../data/discriminating_v2_*pickle'):
data = misc.fromFile(fn)
print fn, results.shape
print('analyzing results')
alphas = np.array(data['alphas'])
print ' alphas = ', alphas
print '# of trials :', np.abs(data['results']).sum(axis=1)
print 'average results: ', (data['results'].sum(axis=1)/np.abs(data['results']).sum(axis=1)*.5+.5)
width = (alphas.max()-alphas.min())/len(alphas)
ax.bar(data['alphas'] - width/2, (data['results'].sum(axis=1)/np.abs(data['results']).sum(axis=1)*.5+.5), width=width, alpha=.3 )
# for i_trial in xrange(data['results'].shape[1]):
# ax.scatter(data['alphas'][data['results']
ax.set_xlabel('alpha')
ax.set_ylabel('% correct')
# TODO : correct for the number of trials / make global reading / make a new draw of alphas
# TODO : test f_0
Testing basic functions of Motion Clouds
Motion Clouds: raw principles¶
Motion Clouds are synthesized textures which aim at having similar characteristics as natural images but with controlled parameters. There are many ways to achieve these results and this notebook aims at showing that different procedures from different communities (neurioscience, modelling, computer vision, ...) may produce similar results.
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import pylab
import matplotlib.pyplot as plt
%matplotlib inline
Using Fourier ("official Motion Clouds")¶
import sys
sys.path.append('..')
import MotionClouds as mc
fx, fy, ft = mc.get_grids(mc.N_X, mc.N_Y, mc.N_frame)
Using mixtures of images¶
import scipy.misc
lena = scipy.misc.lena() * 1.
lena -= lena.mean()
lena /= lena.std()
print lena.shape
plt.imshow(lena, cmap=plt.cm.gray)
lena.shape
lena[0, :]
def noise(image=lena):
for axis in [0, 1]:
image = np.roll(image, np.random.randint(image.shape[axis]), axis=axis)
return image
plt.imshow(noise(), cmap=plt.cm.gray)
plt.imshow(noise(), cmap=plt.cm.gray)
Using ARMA processes¶
Now, we define the ARMA process as an averaging process with a certain time constant $\tau=30.$ (in frames).
def ARMA(image, tau=30.):
image = (1 - 1/tau)* image + 1/tau * noise()
return image
initializing
image = ARMA(lena)
plt.imshow(image, cmap=plt.cm.gray)
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
for _ in range(1000): image = ARMA(image)
plt.imshow(image, cmap=plt.cm.gray)
%cd..
N_frame = 1024
z = np.zeros((lena.shape[0], lena.shape[1], N_frame))
z[:, :, 0] = image
for i_frame in range(1, N_frame):
z[:, :, i_frame] = ARMA(z[:, :, i_frame-1])
mc.anim_save(.5 + .5*z, filename='results/arma')
mc.notebook = True
mc.in_show_video(name='arma')